In [18]:
# Import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import json
import os
from tqdm import tqdm, tqdm_notebook
import random
import seaborn as sns
import tensorflow as tf
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.applications import *
from tensorflow.keras.callbacks import *
from tensorflow.keras.initializers import *
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import img_to_array, array_to_img
from keras.preprocessing import image
from sklearn.metrics import *
from sklearn.metrics import confusion_matrix, classification_report
from typing import List
from tensorflow.keras.applications import ResNet50, VGG16
import imageio
import cv2
from typing import Dict
In [3]:
#seeds:
np.random.seed(123)
tf.random.set_seed(123)
Data Loading¶
In [4]:
!git clone https://github.com/Musadiq-Aliyev/Artist-Identification-using-Deep-Learning-MA
Cloning into 'Artist-Identification-using-Deep-Learning-MA'... remote: Enumerating objects: 16889, done. remote: Counting objects: 100% (5/5), done. remote: Compressing objects: 100% (5/5), done. remote: Total 16889 (delta 0), reused 3 (delta 0), pack-reused 16884 Receiving objects: 100% (16889/16889), 2.17 GiB | 23.04 MiB/s, done. Resolving deltas: 100% (35/35), done. Updating files: 100% (8784/8784), done.
In [5]:
artist_df = pd.read_csv('/content/Artist-Identification-using-Deep-Learning-MA/Data/artists.csv', sep=",")
In [6]:
artist_df.head()
Out[6]:
| id | name | years | genre | nationality | bio | wikipedia | paintings | |
|---|---|---|---|---|---|---|---|---|
| 0 | 0 | Amedeo Modigliani | 1884 - 1920 | Expressionism | Italian | Amedeo Clemente Modigliani (Italian pronunciat... | http://en.wikipedia.org/wiki/Amedeo_Modigliani | 193 |
| 1 | 1 | Vasiliy Kandinskiy | 1866 - 1944 | Expressionism,Abstractionism | Russian | Wassily Wassilyevich Kandinsky (Russian: Васи́... | http://en.wikipedia.org/wiki/Wassily_Kandinsky | 88 |
| 2 | 2 | Diego Rivera | 1886 - 1957 | Social Realism,Muralism | Mexican | Diego María de la Concepción Juan Nepomuceno E... | http://en.wikipedia.org/wiki/Diego_Rivera | 70 |
| 3 | 3 | Claude Monet | 1840 - 1926 | Impressionism | French | Oscar-Claude Monet (; French: [klod mɔnɛ]; 14 ... | http://en.wikipedia.org/wiki/Claude_Monet | 73 |
| 4 | 4 | Rene Magritte | 1898 - 1967 | Surrealism,Impressionism | Belgian | René François Ghislain Magritte (French: [ʁəne... | http://en.wikipedia.org/wiki/René_Magritte | 194 |
In [7]:
images_dir = '/content/Artist-Identification-using-Deep-Learning-MA/Data/resized'
Data Processing¶
In [8]:
# Sort artists by number of paintings
artists = artist_df.sort_values(by=['paintings'], ascending=False)
# Create a dataframe with artists having more than 200 paintings
artists_top = artists[artists['paintings'] >= 200].reset_index()
artists_top = artists_top[['name', 'paintings']]
#artists_top['class_weight'] = max(artists_top.paintings)/artists_top.paintings
artists_top['class_weight'] = artists_top.paintings.sum() / (artists_top.shape[0] * artists_top.paintings)
artists_top
Out[8]:
| name | paintings | class_weight | |
|---|---|---|---|
| 0 | Vincent van Gogh | 877 | 0.445631 |
| 1 | Edgar Degas | 702 | 0.556721 |
| 2 | Pablo Picasso | 439 | 0.890246 |
| 3 | Pierre-Auguste Renoir | 336 | 1.163149 |
| 4 | Albrecht Dürer | 328 | 1.191519 |
| 5 | Paul Gauguin | 311 | 1.256650 |
| 6 | Francisco Goya | 291 | 1.343018 |
| 7 | Rembrandt | 262 | 1.491672 |
| 8 | Alfred Sisley | 259 | 1.508951 |
| 9 | Titian | 255 | 1.532620 |
| 10 | Marc Chagall | 239 | 1.635223 |
In [9]:
# Set class weights - assign higher weights to underrepresented classes
class_weights = artists_top['class_weight'].to_dict()
class_weights
Out[9]:
{0: 0.44563076604125634,
1: 0.5567210567210568,
2: 0.8902464278318493,
3: 1.1631493506493507,
4: 1.1915188470066518,
5: 1.2566501023092662,
6: 1.3430178069353327,
7: 1.491672449687717,
8: 1.5089505089505089,
9: 1.532620320855615,
10: 1.6352225180677062}
In [10]:
# There is some problem recognizing 'Albrecht_Dürer' (don't know why, worth exploring)
# So I'll update this string as directory name to df's
updated_name = "Albrecht_Dürer".replace("_", " ")
artists_top.iloc[4, 0] = updated_name
In [11]:
artists_top_name = artists_top['name'].str.replace(' ', '_').values
In [12]:
artists_top_name
Out[12]:
array(['Vincent_van_Gogh', 'Edgar_Degas', 'Pablo_Picasso',
'Pierre-Auguste_Renoir', 'Albrecht_Dürer', 'Paul_Gauguin',
'Francisco_Goya', 'Rembrandt', 'Alfred_Sisley', 'Titian',
'Marc_Chagall'], dtype=object)
Data Visualization¶
In [13]:
plt.figure(figsize=(18,5))
sns.barplot(x=artist_df['nationality'].value_counts().index,y=artist_df['nationality'].value_counts().values)
plt.title('nationality')
plt.xticks(rotation=75)
plt.ylabel('Rates')
plt.legend(loc=0)
plt.show()
WARNING:matplotlib.legend:No artists with labels found to put in legend. Note that artists whose label start with an underscore are ignored when legend() is called with no argument.
In [14]:
plt.figure(figsize=(18,5))
sns.barplot(x=artist_df['genre'].value_counts().index,
y=artist_df['genre'].value_counts().values)
plt.xlabel('genre')
plt.xticks(rotation=75)
plt.ylabel('Frequency')
plt.title('Show of genre Bar Plot')
plt.show()
In [15]:
fig, axes = plt.subplots(1, 5, figsize=(20,10))
for i in range(5):
random_image = random.choice(os.listdir(os.path.join(images_dir)))
random_image_file = os.path.join(images_dir, random_image)
image = plt.imread(random_image_file)
# Get the shape of the image
height, width, channels = image.shape
artist_name = " ".join(random_image.split("_")[:-1])
axes[i].imshow(image)
axes[i].set_title(artist_name+ f": ( {width} x {height} x {channels})")
axes[i].axis('off')
plt.show()
Train and Test Split¶
In [16]:
# Create a DataFrame to store filename and artist mapping
file_artist_mapping = []
for filename in os.listdir(images_dir):
if filename.endswith(('.png', '.jpg', '.jpeg')):
artist_name = "_".join(filename.split("_")[:-1])
if (artist_name in artists_top_name) or artist_name=='Albrecht_Dürer':
artist_name = artist_name.replace('_',' ')
file_artist_mapping.append({'filename': filename, 'artist': artist_name})
df = pd.DataFrame(file_artist_mapping)
# Define image size and other parameters
img_size = (224, 224)
batch_size = 16
n_classes = artists_top.shape[0]
train_input_shape = (224, 224, 3)
# Create data generators
datagen = ImageDataGenerator(validation_split=0.2,
rescale=1./255.,
shear_range=5,
horizontal_flip=True,
vertical_flip=True,)
# Train generator
train_generator = datagen.flow_from_dataframe(
dataframe=df,
directory=images_dir,
x_col='filename',
y_col='artist',
target_size=img_size,
batch_size=batch_size,
class_mode='categorical',
subset='training'
)
# Validation generator
validation_generator = datagen.flow_from_dataframe(
dataframe=df,
directory=images_dir,
x_col='filename',
y_col='artist',
target_size=img_size,
batch_size=batch_size,
class_mode='categorical',
subset='validation'
)
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
STEP_SIZE_VALID = validation_generator.n//validation_generator.batch_size
print("Total number of batches =", STEP_SIZE_TRAIN, "and", STEP_SIZE_VALID)
Found 3440 validated image filenames belonging to 11 classes. Found 859 validated image filenames belonging to 11 classes. Total number of batches = 215 and 53
Data Augmentation Visualization¶
In [19]:
def transformed_image_visualization(images_dir: str, artists_top_name: List[str], datagen: ImageDataGenerator) -> None:
"""
Display an original image and its randomly transformed version for a random artist.
Parameters:
- images_dir (str): Directory path containing images.
- artists_top_name (List[str]): List of artist names.
- datagen (ImageDataGenerator): ImageDataGenerator for data augmentation.
Returns:
None
"""
# Create subplots for original and transformed images
fig, axes = plt.subplots(1, 2, figsize=(20, 10))
# Randomly choose an artist and an image for visualization
random_artist = random.choice(artists_top_name)
random_image = random.choice(os.listdir(os.path.join(images_dir)))
random_image_file = os.path.join(images_dir, random_image)
# Load and display the original image
image = plt.imread(random_image_file)
axes[0].imshow(image)
axes[0].set_title("An original Image of " + random_artist.replace('_', ' '))
axes[0].axis('off')
# Apply random transformation using the provided ImageDataGenerator
aug_image = datagen.random_transform(image)
# Display the transformed image
axes[1].imshow(aug_image)
axes[1].set_title("A transformed Image of " + random_artist.replace('_', ' '))
axes[1].axis('off')
# Show the plot
plt.show()
In [24]:
transformed_image_visualization(images_dir,artists_top_name,datagen)
In [25]:
transformed_image_visualization(images_dir,artists_top_name,datagen)
Transfer Learning¶
VCG-16¶
In [26]:
def build_custom_model(base_model: Model, n_classes: int, dense_units: list[int] = [512, 16], activation: str = 'relu') -> Model:
"""
Build a custom model on top of a base model.
Parameters:
- base_model (Model): Base model on top of which the custom model will be built.
- n_classes (int): Number of output classes for the final softmax layer.
- dense_units (list[int]): List of units for dense layers. Default is [512, 16].
- activation (str): Activation function for dense layers. Default is 'relu'.
Returns:
Model: The custom model.
"""
# Flatten the output of the base model
x = Flatten()(base_model.output)
# Add dense layers with batch normalization and activation
for units in dense_units:
x = Dense(units, kernel_initializer='he_uniform')(x)
x = BatchNormalization()(x)
x = Activation(activation)(x)
# Output layer with softmax activation for classification
output = Dense(n_classes, activation='softmax')(x)
# Create the custom model
model = Model(inputs=base_model.input, outputs=output)
return model
# Load pre-trained models
base_model_vgg16 = VGG16(weights='imagenet', include_top=False, input_shape=train_input_shape)
for layer in base_model_vgg16.layers:
layer.trainable = True
# Build custom models
model_vgg16 = build_custom_model(base_model_vgg16, n_classes)
# Compile and train VGG16
optimizer_vgg16 = Adam(learning_rate=0.0001)
model_vgg16.compile(loss='categorical_crossentropy',
optimizer=optimizer_vgg16,
metrics=['accuracy'])
early_stop_vgg16 = EarlyStopping(monitor='val_loss', patience=20, verbose=1,
mode='auto', restore_best_weights=True)
history_vgg16 = model_vgg16.fit_generator(generator=train_generator, steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=validation_generator, validation_steps=STEP_SIZE_VALID,
epochs=10,
shuffle=True,
verbose=1,
callbacks=[early_stop_vgg16],
class_weight=class_weights)
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5 58889256/58889256 [==============================] - 0s 0us/step
<ipython-input-26-17c15a29563b>:51: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators. history_vgg16 = model_vgg16.fit_generator(generator=train_generator, steps_per_epoch=STEP_SIZE_TRAIN,
Epoch 1/10 215/215 [==============================] - 119s 453ms/step - loss: 2.4150 - accuracy: 0.3451 - val_loss: 2.6310 - val_accuracy: 0.2972 Epoch 2/10 215/215 [==============================] - 98s 453ms/step - loss: 2.1115 - accuracy: 0.4616 - val_loss: 2.2789 - val_accuracy: 0.2547 Epoch 3/10 215/215 [==============================] - 98s 457ms/step - loss: 1.9024 - accuracy: 0.5273 - val_loss: 2.6073 - val_accuracy: 0.1462 Epoch 4/10 215/215 [==============================] - 95s 440ms/step - loss: 1.7590 - accuracy: 0.5663 - val_loss: 1.7823 - val_accuracy: 0.4257 Epoch 5/10 215/215 [==============================] - 95s 441ms/step - loss: 1.5827 - accuracy: 0.6142 - val_loss: 1.4909 - val_accuracy: 0.5153 Epoch 6/10 215/215 [==============================] - 98s 457ms/step - loss: 1.4990 - accuracy: 0.6326 - val_loss: 1.5170 - val_accuracy: 0.5106 Epoch 7/10 215/215 [==============================] - 99s 460ms/step - loss: 1.4118 - accuracy: 0.6523 - val_loss: 1.3865 - val_accuracy: 0.5778 Epoch 8/10 215/215 [==============================] - 98s 457ms/step - loss: 1.3151 - accuracy: 0.6738 - val_loss: 1.3680 - val_accuracy: 0.5531 Epoch 9/10 215/215 [==============================] - 95s 440ms/step - loss: 1.2391 - accuracy: 0.6951 - val_loss: 1.4387 - val_accuracy: 0.5307 Epoch 10/10 215/215 [==============================] - 93s 432ms/step - loss: 1.1709 - accuracy: 0.6980 - val_loss: 1.5650 - val_accuracy: 0.5177
Resnet50¶
In [27]:
# Load pre-trained models
base_model_resnet50 = ResNet50(weights='imagenet', include_top=False, input_shape=train_input_shape)
# Build custom models
model_resnet50 = build_custom_model(base_model_resnet50, n_classes)
# Compile and train ResNet50
optimizer_resnet50 = Adam(learning_rate=0.0001)
model_resnet50.compile(loss='categorical_crossentropy',
optimizer=optimizer_resnet50,
metrics=['accuracy'])
early_stop_resnet50 = EarlyStopping(monitor='val_loss', patience=20, verbose=1,
mode='auto', restore_best_weights=True)
history_resnet50 = model_resnet50.fit_generator(generator=train_generator, steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=validation_generator, validation_steps=STEP_SIZE_VALID,
epochs=10,
shuffle=True,
verbose=1,
callbacks=[early_stop_resnet50],
class_weight=class_weights)
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5 94765736/94765736 [==============================] - 1s 0us/step
<ipython-input-27-fc60169746d9>:15: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators. history_resnet50 = model_resnet50.fit_generator(generator=train_generator, steps_per_epoch=STEP_SIZE_TRAIN,
Epoch 1/10 215/215 [==============================] - 130s 458ms/step - loss: 2.0063 - accuracy: 0.5512 - val_loss: 3.9725 - val_accuracy: 0.0507 Epoch 2/10 215/215 [==============================] - 99s 458ms/step - loss: 1.4997 - accuracy: 0.7567 - val_loss: 2.4622 - val_accuracy: 0.0790 Epoch 3/10 215/215 [==============================] - 96s 448ms/step - loss: 1.3045 - accuracy: 0.7930 - val_loss: 2.6253 - val_accuracy: 0.0979 Epoch 4/10 215/215 [==============================] - 93s 429ms/step - loss: 1.1146 - accuracy: 0.8506 - val_loss: 2.0221 - val_accuracy: 0.3856 Epoch 5/10 215/215 [==============================] - 98s 455ms/step - loss: 0.9828 - accuracy: 0.8718 - val_loss: 1.2390 - val_accuracy: 0.7052 Epoch 6/10 215/215 [==============================] - 93s 434ms/step - loss: 0.8652 - accuracy: 0.8927 - val_loss: 0.9312 - val_accuracy: 0.8208 Epoch 7/10 215/215 [==============================] - 98s 454ms/step - loss: 0.7823 - accuracy: 0.9000 - val_loss: 0.9824 - val_accuracy: 0.7547 Epoch 8/10 215/215 [==============================] - 94s 434ms/step - loss: 0.7090 - accuracy: 0.9125 - val_loss: 0.8625 - val_accuracy: 0.7995 Epoch 9/10 215/215 [==============================] - 94s 435ms/step - loss: 0.6334 - accuracy: 0.9142 - val_loss: 0.7923 - val_accuracy: 0.8302 Epoch 10/10 215/215 [==============================] - 97s 451ms/step - loss: 0.5278 - accuracy: 0.9416 - val_loss: 0.7345 - val_accuracy: 0.8278
Model Evaluation¶
In [28]:
def evaluate(model, generator, verbose=1):
score = model.evaluate_generator(generator, verbose=verbose)
return score[1]
# Assuming you have trained models: model_resnet50, model_vgg16
accuracy_resnet50_train = evaluate(model_resnet50, train_generator)
accuracy_resnet50_cv = evaluate(model_resnet50, validation_generator)
accuracy_vgg16_train = evaluate(model_vgg16, train_generator)
accuracy_vgg16_cv = evaluate(model_vgg16, validation_generator)
print("ResNet50 - Prediction accuracy on train data =", accuracy_resnet50_train)
print("ResNet50 - Prediction accuracy on CV data =", accuracy_resnet50_cv)
print("VGG16 - Prediction accuracy on train data =", accuracy_vgg16_train)
print("VGG16 - Prediction accuracy on CV data =", accuracy_vgg16_cv)
<ipython-input-28-c5c8f516f0c5>:2: UserWarning: `Model.evaluate_generator` is deprecated and will be removed in a future version. Please use `Model.evaluate`, which supports generators. score = model.evaluate_generator(generator, verbose=verbose)
215/215 [==============================] - 68s 315ms/step - loss: 0.3251 - accuracy: 0.9770 54/54 [==============================] - 17s 314ms/step - loss: 0.7195 - accuracy: 0.8463 215/215 [==============================] - 66s 305ms/step - loss: 1.3395 - accuracy: 0.6003 54/54 [==============================] - 18s 336ms/step - loss: 1.5556 - accuracy: 0.5367 ResNet50 - Prediction accuracy on train data = 0.977034866809845 ResNet50 - Prediction accuracy on CV data = 0.8463329672813416 VGG16 - Prediction accuracy on train data = 0.6002907156944275 VGG16 - Prediction accuracy on CV data = 0.5366705656051636
In [29]:
model_vgg16.summary()
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 224, 224, 3)] 0
block1_conv1 (Conv2D) (None, 224, 224, 64) 1792
block1_conv2 (Conv2D) (None, 224, 224, 64) 36928
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0
block2_conv1 (Conv2D) (None, 112, 112, 128) 73856
block2_conv2 (Conv2D) (None, 112, 112, 128) 147584
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0
block3_conv1 (Conv2D) (None, 56, 56, 256) 295168
block3_conv2 (Conv2D) (None, 56, 56, 256) 590080
block3_conv3 (Conv2D) (None, 56, 56, 256) 590080
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0
block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160
block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808
block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0
block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808
block5_pool (MaxPooling2D) (None, 7, 7, 512) 0
flatten (Flatten) (None, 25088) 0
dense (Dense) (None, 512) 12845568
batch_normalization (Batch (None, 512) 2048
Normalization)
activation (Activation) (None, 512) 0
dense_1 (Dense) (None, 16) 8208
batch_normalization_1 (Bat (None, 16) 64
chNormalization)
activation_1 (Activation) (None, 16) 0
dense_2 (Dense) (None, 11) 187
=================================================================
Total params: 27570763 (105.17 MB)
Trainable params: 27569707 (105.17 MB)
Non-trainable params: 1056 (4.12 KB)
_________________________________________________________________
In [30]:
model_resnet50.summary()
Model: "model_1"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_2 (InputLayer) [(None, 224, 224, 3)] 0 []
conv1_pad (ZeroPadding2D) (None, 230, 230, 3) 0 ['input_2[0][0]']
conv1_conv (Conv2D) (None, 112, 112, 64) 9472 ['conv1_pad[0][0]']
conv1_bn (BatchNormalizati (None, 112, 112, 64) 256 ['conv1_conv[0][0]']
on)
conv1_relu (Activation) (None, 112, 112, 64) 0 ['conv1_bn[0][0]']
pool1_pad (ZeroPadding2D) (None, 114, 114, 64) 0 ['conv1_relu[0][0]']
pool1_pool (MaxPooling2D) (None, 56, 56, 64) 0 ['pool1_pad[0][0]']
conv2_block1_1_conv (Conv2 (None, 56, 56, 64) 4160 ['pool1_pool[0][0]']
D)
conv2_block1_1_bn (BatchNo (None, 56, 56, 64) 256 ['conv2_block1_1_conv[0][0]']
rmalization)
conv2_block1_1_relu (Activ (None, 56, 56, 64) 0 ['conv2_block1_1_bn[0][0]']
ation)
conv2_block1_2_conv (Conv2 (None, 56, 56, 64) 36928 ['conv2_block1_1_relu[0][0]']
D)
conv2_block1_2_bn (BatchNo (None, 56, 56, 64) 256 ['conv2_block1_2_conv[0][0]']
rmalization)
conv2_block1_2_relu (Activ (None, 56, 56, 64) 0 ['conv2_block1_2_bn[0][0]']
ation)
conv2_block1_0_conv (Conv2 (None, 56, 56, 256) 16640 ['pool1_pool[0][0]']
D)
conv2_block1_3_conv (Conv2 (None, 56, 56, 256) 16640 ['conv2_block1_2_relu[0][0]']
D)
conv2_block1_0_bn (BatchNo (None, 56, 56, 256) 1024 ['conv2_block1_0_conv[0][0]']
rmalization)
conv2_block1_3_bn (BatchNo (None, 56, 56, 256) 1024 ['conv2_block1_3_conv[0][0]']
rmalization)
conv2_block1_add (Add) (None, 56, 56, 256) 0 ['conv2_block1_0_bn[0][0]',
'conv2_block1_3_bn[0][0]']
conv2_block1_out (Activati (None, 56, 56, 256) 0 ['conv2_block1_add[0][0]']
on)
conv2_block2_1_conv (Conv2 (None, 56, 56, 64) 16448 ['conv2_block1_out[0][0]']
D)
conv2_block2_1_bn (BatchNo (None, 56, 56, 64) 256 ['conv2_block2_1_conv[0][0]']
rmalization)
conv2_block2_1_relu (Activ (None, 56, 56, 64) 0 ['conv2_block2_1_bn[0][0]']
ation)
conv2_block2_2_conv (Conv2 (None, 56, 56, 64) 36928 ['conv2_block2_1_relu[0][0]']
D)
conv2_block2_2_bn (BatchNo (None, 56, 56, 64) 256 ['conv2_block2_2_conv[0][0]']
rmalization)
conv2_block2_2_relu (Activ (None, 56, 56, 64) 0 ['conv2_block2_2_bn[0][0]']
ation)
conv2_block2_3_conv (Conv2 (None, 56, 56, 256) 16640 ['conv2_block2_2_relu[0][0]']
D)
conv2_block2_3_bn (BatchNo (None, 56, 56, 256) 1024 ['conv2_block2_3_conv[0][0]']
rmalization)
conv2_block2_add (Add) (None, 56, 56, 256) 0 ['conv2_block1_out[0][0]',
'conv2_block2_3_bn[0][0]']
conv2_block2_out (Activati (None, 56, 56, 256) 0 ['conv2_block2_add[0][0]']
on)
conv2_block3_1_conv (Conv2 (None, 56, 56, 64) 16448 ['conv2_block2_out[0][0]']
D)
conv2_block3_1_bn (BatchNo (None, 56, 56, 64) 256 ['conv2_block3_1_conv[0][0]']
rmalization)
conv2_block3_1_relu (Activ (None, 56, 56, 64) 0 ['conv2_block3_1_bn[0][0]']
ation)
conv2_block3_2_conv (Conv2 (None, 56, 56, 64) 36928 ['conv2_block3_1_relu[0][0]']
D)
conv2_block3_2_bn (BatchNo (None, 56, 56, 64) 256 ['conv2_block3_2_conv[0][0]']
rmalization)
conv2_block3_2_relu (Activ (None, 56, 56, 64) 0 ['conv2_block3_2_bn[0][0]']
ation)
conv2_block3_3_conv (Conv2 (None, 56, 56, 256) 16640 ['conv2_block3_2_relu[0][0]']
D)
conv2_block3_3_bn (BatchNo (None, 56, 56, 256) 1024 ['conv2_block3_3_conv[0][0]']
rmalization)
conv2_block3_add (Add) (None, 56, 56, 256) 0 ['conv2_block2_out[0][0]',
'conv2_block3_3_bn[0][0]']
conv2_block3_out (Activati (None, 56, 56, 256) 0 ['conv2_block3_add[0][0]']
on)
conv3_block1_1_conv (Conv2 (None, 28, 28, 128) 32896 ['conv2_block3_out[0][0]']
D)
conv3_block1_1_bn (BatchNo (None, 28, 28, 128) 512 ['conv3_block1_1_conv[0][0]']
rmalization)
conv3_block1_1_relu (Activ (None, 28, 28, 128) 0 ['conv3_block1_1_bn[0][0]']
ation)
conv3_block1_2_conv (Conv2 (None, 28, 28, 128) 147584 ['conv3_block1_1_relu[0][0]']
D)
conv3_block1_2_bn (BatchNo (None, 28, 28, 128) 512 ['conv3_block1_2_conv[0][0]']
rmalization)
conv3_block1_2_relu (Activ (None, 28, 28, 128) 0 ['conv3_block1_2_bn[0][0]']
ation)
conv3_block1_0_conv (Conv2 (None, 28, 28, 512) 131584 ['conv2_block3_out[0][0]']
D)
conv3_block1_3_conv (Conv2 (None, 28, 28, 512) 66048 ['conv3_block1_2_relu[0][0]']
D)
conv3_block1_0_bn (BatchNo (None, 28, 28, 512) 2048 ['conv3_block1_0_conv[0][0]']
rmalization)
conv3_block1_3_bn (BatchNo (None, 28, 28, 512) 2048 ['conv3_block1_3_conv[0][0]']
rmalization)
conv3_block1_add (Add) (None, 28, 28, 512) 0 ['conv3_block1_0_bn[0][0]',
'conv3_block1_3_bn[0][0]']
conv3_block1_out (Activati (None, 28, 28, 512) 0 ['conv3_block1_add[0][0]']
on)
conv3_block2_1_conv (Conv2 (None, 28, 28, 128) 65664 ['conv3_block1_out[0][0]']
D)
conv3_block2_1_bn (BatchNo (None, 28, 28, 128) 512 ['conv3_block2_1_conv[0][0]']
rmalization)
conv3_block2_1_relu (Activ (None, 28, 28, 128) 0 ['conv3_block2_1_bn[0][0]']
ation)
conv3_block2_2_conv (Conv2 (None, 28, 28, 128) 147584 ['conv3_block2_1_relu[0][0]']
D)
conv3_block2_2_bn (BatchNo (None, 28, 28, 128) 512 ['conv3_block2_2_conv[0][0]']
rmalization)
conv3_block2_2_relu (Activ (None, 28, 28, 128) 0 ['conv3_block2_2_bn[0][0]']
ation)
conv3_block2_3_conv (Conv2 (None, 28, 28, 512) 66048 ['conv3_block2_2_relu[0][0]']
D)
conv3_block2_3_bn (BatchNo (None, 28, 28, 512) 2048 ['conv3_block2_3_conv[0][0]']
rmalization)
conv3_block2_add (Add) (None, 28, 28, 512) 0 ['conv3_block1_out[0][0]',
'conv3_block2_3_bn[0][0]']
conv3_block2_out (Activati (None, 28, 28, 512) 0 ['conv3_block2_add[0][0]']
on)
conv3_block3_1_conv (Conv2 (None, 28, 28, 128) 65664 ['conv3_block2_out[0][0]']
D)
conv3_block3_1_bn (BatchNo (None, 28, 28, 128) 512 ['conv3_block3_1_conv[0][0]']
rmalization)
conv3_block3_1_relu (Activ (None, 28, 28, 128) 0 ['conv3_block3_1_bn[0][0]']
ation)
conv3_block3_2_conv (Conv2 (None, 28, 28, 128) 147584 ['conv3_block3_1_relu[0][0]']
D)
conv3_block3_2_bn (BatchNo (None, 28, 28, 128) 512 ['conv3_block3_2_conv[0][0]']
rmalization)
conv3_block3_2_relu (Activ (None, 28, 28, 128) 0 ['conv3_block3_2_bn[0][0]']
ation)
conv3_block3_3_conv (Conv2 (None, 28, 28, 512) 66048 ['conv3_block3_2_relu[0][0]']
D)
conv3_block3_3_bn (BatchNo (None, 28, 28, 512) 2048 ['conv3_block3_3_conv[0][0]']
rmalization)
conv3_block3_add (Add) (None, 28, 28, 512) 0 ['conv3_block2_out[0][0]',
'conv3_block3_3_bn[0][0]']
conv3_block3_out (Activati (None, 28, 28, 512) 0 ['conv3_block3_add[0][0]']
on)
conv3_block4_1_conv (Conv2 (None, 28, 28, 128) 65664 ['conv3_block3_out[0][0]']
D)
conv3_block4_1_bn (BatchNo (None, 28, 28, 128) 512 ['conv3_block4_1_conv[0][0]']
rmalization)
conv3_block4_1_relu (Activ (None, 28, 28, 128) 0 ['conv3_block4_1_bn[0][0]']
ation)
conv3_block4_2_conv (Conv2 (None, 28, 28, 128) 147584 ['conv3_block4_1_relu[0][0]']
D)
conv3_block4_2_bn (BatchNo (None, 28, 28, 128) 512 ['conv3_block4_2_conv[0][0]']
rmalization)
conv3_block4_2_relu (Activ (None, 28, 28, 128) 0 ['conv3_block4_2_bn[0][0]']
ation)
conv3_block4_3_conv (Conv2 (None, 28, 28, 512) 66048 ['conv3_block4_2_relu[0][0]']
D)
conv3_block4_3_bn (BatchNo (None, 28, 28, 512) 2048 ['conv3_block4_3_conv[0][0]']
rmalization)
conv3_block4_add (Add) (None, 28, 28, 512) 0 ['conv3_block3_out[0][0]',
'conv3_block4_3_bn[0][0]']
conv3_block4_out (Activati (None, 28, 28, 512) 0 ['conv3_block4_add[0][0]']
on)
conv4_block1_1_conv (Conv2 (None, 14, 14, 256) 131328 ['conv3_block4_out[0][0]']
D)
conv4_block1_1_bn (BatchNo (None, 14, 14, 256) 1024 ['conv4_block1_1_conv[0][0]']
rmalization)
conv4_block1_1_relu (Activ (None, 14, 14, 256) 0 ['conv4_block1_1_bn[0][0]']
ation)
conv4_block1_2_conv (Conv2 (None, 14, 14, 256) 590080 ['conv4_block1_1_relu[0][0]']
D)
conv4_block1_2_bn (BatchNo (None, 14, 14, 256) 1024 ['conv4_block1_2_conv[0][0]']
rmalization)
conv4_block1_2_relu (Activ (None, 14, 14, 256) 0 ['conv4_block1_2_bn[0][0]']
ation)
conv4_block1_0_conv (Conv2 (None, 14, 14, 1024) 525312 ['conv3_block4_out[0][0]']
D)
conv4_block1_3_conv (Conv2 (None, 14, 14, 1024) 263168 ['conv4_block1_2_relu[0][0]']
D)
conv4_block1_0_bn (BatchNo (None, 14, 14, 1024) 4096 ['conv4_block1_0_conv[0][0]']
rmalization)
conv4_block1_3_bn (BatchNo (None, 14, 14, 1024) 4096 ['conv4_block1_3_conv[0][0]']
rmalization)
conv4_block1_add (Add) (None, 14, 14, 1024) 0 ['conv4_block1_0_bn[0][0]',
'conv4_block1_3_bn[0][0]']
conv4_block1_out (Activati (None, 14, 14, 1024) 0 ['conv4_block1_add[0][0]']
on)
conv4_block2_1_conv (Conv2 (None, 14, 14, 256) 262400 ['conv4_block1_out[0][0]']
D)
conv4_block2_1_bn (BatchNo (None, 14, 14, 256) 1024 ['conv4_block2_1_conv[0][0]']
rmalization)
conv4_block2_1_relu (Activ (None, 14, 14, 256) 0 ['conv4_block2_1_bn[0][0]']
ation)
conv4_block2_2_conv (Conv2 (None, 14, 14, 256) 590080 ['conv4_block2_1_relu[0][0]']
D)
conv4_block2_2_bn (BatchNo (None, 14, 14, 256) 1024 ['conv4_block2_2_conv[0][0]']
rmalization)
conv4_block2_2_relu (Activ (None, 14, 14, 256) 0 ['conv4_block2_2_bn[0][0]']
ation)
conv4_block2_3_conv (Conv2 (None, 14, 14, 1024) 263168 ['conv4_block2_2_relu[0][0]']
D)
conv4_block2_3_bn (BatchNo (None, 14, 14, 1024) 4096 ['conv4_block2_3_conv[0][0]']
rmalization)
conv4_block2_add (Add) (None, 14, 14, 1024) 0 ['conv4_block1_out[0][0]',
'conv4_block2_3_bn[0][0]']
conv4_block2_out (Activati (None, 14, 14, 1024) 0 ['conv4_block2_add[0][0]']
on)
conv4_block3_1_conv (Conv2 (None, 14, 14, 256) 262400 ['conv4_block2_out[0][0]']
D)
conv4_block3_1_bn (BatchNo (None, 14, 14, 256) 1024 ['conv4_block3_1_conv[0][0]']
rmalization)
conv4_block3_1_relu (Activ (None, 14, 14, 256) 0 ['conv4_block3_1_bn[0][0]']
ation)
conv4_block3_2_conv (Conv2 (None, 14, 14, 256) 590080 ['conv4_block3_1_relu[0][0]']
D)
conv4_block3_2_bn (BatchNo (None, 14, 14, 256) 1024 ['conv4_block3_2_conv[0][0]']
rmalization)
conv4_block3_2_relu (Activ (None, 14, 14, 256) 0 ['conv4_block3_2_bn[0][0]']
ation)
conv4_block3_3_conv (Conv2 (None, 14, 14, 1024) 263168 ['conv4_block3_2_relu[0][0]']
D)
conv4_block3_3_bn (BatchNo (None, 14, 14, 1024) 4096 ['conv4_block3_3_conv[0][0]']
rmalization)
conv4_block3_add (Add) (None, 14, 14, 1024) 0 ['conv4_block2_out[0][0]',
'conv4_block3_3_bn[0][0]']
conv4_block3_out (Activati (None, 14, 14, 1024) 0 ['conv4_block3_add[0][0]']
on)
conv4_block4_1_conv (Conv2 (None, 14, 14, 256) 262400 ['conv4_block3_out[0][0]']
D)
conv4_block4_1_bn (BatchNo (None, 14, 14, 256) 1024 ['conv4_block4_1_conv[0][0]']
rmalization)
conv4_block4_1_relu (Activ (None, 14, 14, 256) 0 ['conv4_block4_1_bn[0][0]']
ation)
conv4_block4_2_conv (Conv2 (None, 14, 14, 256) 590080 ['conv4_block4_1_relu[0][0]']
D)
conv4_block4_2_bn (BatchNo (None, 14, 14, 256) 1024 ['conv4_block4_2_conv[0][0]']
rmalization)
conv4_block4_2_relu (Activ (None, 14, 14, 256) 0 ['conv4_block4_2_bn[0][0]']
ation)
conv4_block4_3_conv (Conv2 (None, 14, 14, 1024) 263168 ['conv4_block4_2_relu[0][0]']
D)
conv4_block4_3_bn (BatchNo (None, 14, 14, 1024) 4096 ['conv4_block4_3_conv[0][0]']
rmalization)
conv4_block4_add (Add) (None, 14, 14, 1024) 0 ['conv4_block3_out[0][0]',
'conv4_block4_3_bn[0][0]']
conv4_block4_out (Activati (None, 14, 14, 1024) 0 ['conv4_block4_add[0][0]']
on)
conv4_block5_1_conv (Conv2 (None, 14, 14, 256) 262400 ['conv4_block4_out[0][0]']
D)
conv4_block5_1_bn (BatchNo (None, 14, 14, 256) 1024 ['conv4_block5_1_conv[0][0]']
rmalization)
conv4_block5_1_relu (Activ (None, 14, 14, 256) 0 ['conv4_block5_1_bn[0][0]']
ation)
conv4_block5_2_conv (Conv2 (None, 14, 14, 256) 590080 ['conv4_block5_1_relu[0][0]']
D)
conv4_block5_2_bn (BatchNo (None, 14, 14, 256) 1024 ['conv4_block5_2_conv[0][0]']
rmalization)
conv4_block5_2_relu (Activ (None, 14, 14, 256) 0 ['conv4_block5_2_bn[0][0]']
ation)
conv4_block5_3_conv (Conv2 (None, 14, 14, 1024) 263168 ['conv4_block5_2_relu[0][0]']
D)
conv4_block5_3_bn (BatchNo (None, 14, 14, 1024) 4096 ['conv4_block5_3_conv[0][0]']
rmalization)
conv4_block5_add (Add) (None, 14, 14, 1024) 0 ['conv4_block4_out[0][0]',
'conv4_block5_3_bn[0][0]']
conv4_block5_out (Activati (None, 14, 14, 1024) 0 ['conv4_block5_add[0][0]']
on)
conv4_block6_1_conv (Conv2 (None, 14, 14, 256) 262400 ['conv4_block5_out[0][0]']
D)
conv4_block6_1_bn (BatchNo (None, 14, 14, 256) 1024 ['conv4_block6_1_conv[0][0]']
rmalization)
conv4_block6_1_relu (Activ (None, 14, 14, 256) 0 ['conv4_block6_1_bn[0][0]']
ation)
conv4_block6_2_conv (Conv2 (None, 14, 14, 256) 590080 ['conv4_block6_1_relu[0][0]']
D)
conv4_block6_2_bn (BatchNo (None, 14, 14, 256) 1024 ['conv4_block6_2_conv[0][0]']
rmalization)
conv4_block6_2_relu (Activ (None, 14, 14, 256) 0 ['conv4_block6_2_bn[0][0]']
ation)
conv4_block6_3_conv (Conv2 (None, 14, 14, 1024) 263168 ['conv4_block6_2_relu[0][0]']
D)
conv4_block6_3_bn (BatchNo (None, 14, 14, 1024) 4096 ['conv4_block6_3_conv[0][0]']
rmalization)
conv4_block6_add (Add) (None, 14, 14, 1024) 0 ['conv4_block5_out[0][0]',
'conv4_block6_3_bn[0][0]']
conv4_block6_out (Activati (None, 14, 14, 1024) 0 ['conv4_block6_add[0][0]']
on)
conv5_block1_1_conv (Conv2 (None, 7, 7, 512) 524800 ['conv4_block6_out[0][0]']
D)
conv5_block1_1_bn (BatchNo (None, 7, 7, 512) 2048 ['conv5_block1_1_conv[0][0]']
rmalization)
conv5_block1_1_relu (Activ (None, 7, 7, 512) 0 ['conv5_block1_1_bn[0][0]']
ation)
conv5_block1_2_conv (Conv2 (None, 7, 7, 512) 2359808 ['conv5_block1_1_relu[0][0]']
D)
conv5_block1_2_bn (BatchNo (None, 7, 7, 512) 2048 ['conv5_block1_2_conv[0][0]']
rmalization)
conv5_block1_2_relu (Activ (None, 7, 7, 512) 0 ['conv5_block1_2_bn[0][0]']
ation)
conv5_block1_0_conv (Conv2 (None, 7, 7, 2048) 2099200 ['conv4_block6_out[0][0]']
D)
conv5_block1_3_conv (Conv2 (None, 7, 7, 2048) 1050624 ['conv5_block1_2_relu[0][0]']
D)
conv5_block1_0_bn (BatchNo (None, 7, 7, 2048) 8192 ['conv5_block1_0_conv[0][0]']
rmalization)
conv5_block1_3_bn (BatchNo (None, 7, 7, 2048) 8192 ['conv5_block1_3_conv[0][0]']
rmalization)
conv5_block1_add (Add) (None, 7, 7, 2048) 0 ['conv5_block1_0_bn[0][0]',
'conv5_block1_3_bn[0][0]']
conv5_block1_out (Activati (None, 7, 7, 2048) 0 ['conv5_block1_add[0][0]']
on)
conv5_block2_1_conv (Conv2 (None, 7, 7, 512) 1049088 ['conv5_block1_out[0][0]']
D)
conv5_block2_1_bn (BatchNo (None, 7, 7, 512) 2048 ['conv5_block2_1_conv[0][0]']
rmalization)
conv5_block2_1_relu (Activ (None, 7, 7, 512) 0 ['conv5_block2_1_bn[0][0]']
ation)
conv5_block2_2_conv (Conv2 (None, 7, 7, 512) 2359808 ['conv5_block2_1_relu[0][0]']
D)
conv5_block2_2_bn (BatchNo (None, 7, 7, 512) 2048 ['conv5_block2_2_conv[0][0]']
rmalization)
conv5_block2_2_relu (Activ (None, 7, 7, 512) 0 ['conv5_block2_2_bn[0][0]']
ation)
conv5_block2_3_conv (Conv2 (None, 7, 7, 2048) 1050624 ['conv5_block2_2_relu[0][0]']
D)
conv5_block2_3_bn (BatchNo (None, 7, 7, 2048) 8192 ['conv5_block2_3_conv[0][0]']
rmalization)
conv5_block2_add (Add) (None, 7, 7, 2048) 0 ['conv5_block1_out[0][0]',
'conv5_block2_3_bn[0][0]']
conv5_block2_out (Activati (None, 7, 7, 2048) 0 ['conv5_block2_add[0][0]']
on)
conv5_block3_1_conv (Conv2 (None, 7, 7, 512) 1049088 ['conv5_block2_out[0][0]']
D)
conv5_block3_1_bn (BatchNo (None, 7, 7, 512) 2048 ['conv5_block3_1_conv[0][0]']
rmalization)
conv5_block3_1_relu (Activ (None, 7, 7, 512) 0 ['conv5_block3_1_bn[0][0]']
ation)
conv5_block3_2_conv (Conv2 (None, 7, 7, 512) 2359808 ['conv5_block3_1_relu[0][0]']
D)
conv5_block3_2_bn (BatchNo (None, 7, 7, 512) 2048 ['conv5_block3_2_conv[0][0]']
rmalization)
conv5_block3_2_relu (Activ (None, 7, 7, 512) 0 ['conv5_block3_2_bn[0][0]']
ation)
conv5_block3_3_conv (Conv2 (None, 7, 7, 2048) 1050624 ['conv5_block3_2_relu[0][0]']
D)
conv5_block3_3_bn (BatchNo (None, 7, 7, 2048) 8192 ['conv5_block3_3_conv[0][0]']
rmalization)
conv5_block3_add (Add) (None, 7, 7, 2048) 0 ['conv5_block2_out[0][0]',
'conv5_block3_3_bn[0][0]']
conv5_block3_out (Activati (None, 7, 7, 2048) 0 ['conv5_block3_add[0][0]']
on)
flatten_1 (Flatten) (None, 100352) 0 ['conv5_block3_out[0][0]']
dense_3 (Dense) (None, 512) 5138073 ['flatten_1[0][0]']
6
batch_normalization_2 (Bat (None, 512) 2048 ['dense_3[0][0]']
chNormalization)
activation_2 (Activation) (None, 512) 0 ['batch_normalization_2[0][0]'
]
dense_4 (Dense) (None, 16) 8208 ['activation_2[0][0]']
batch_normalization_3 (Bat (None, 16) 64 ['dense_4[0][0]']
chNormalization)
activation_3 (Activation) (None, 16) 0 ['batch_normalization_3[0][0]'
]
dense_5 (Dense) (None, 11) 187 ['activation_3[0][0]']
==================================================================================================
Total params: 74978955 (286.02 MB)
Trainable params: 74924779 (285.82 MB)
Non-trainable params: 54176 (211.62 KB)
__________________________________________________________________________________________________
In [31]:
def plot_model_history(history: Dict[str, list], title: str = "Model Performance") -> None:
"""
Plot the accuracy and loss history of a model.
Parameters:
- history (Dict[str, list]): Model training history containing 'accuracy', 'val_accuracy', 'loss', and 'val_loss'.
- title (str): Title of the plot. Default is "Model Performance".
Returns:
None
"""
# Plot accuracy
plt.plot(history.history["accuracy"])
plt.plot(history.history['val_accuracy'])
# Plot loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
# Set plot attributes
plt.title(title)
plt.ylabel("Accuracy / Loss")
plt.xlabel("Epoch")
plt.legend(["Accuracy", "Validation Accuracy", "Loss", "Validation Loss"])
plt.show()
# Example usage:
plot_model_history(history_vgg16, title="VGG16 Model Performance")
In [32]:
plot_model_history(history_resnet50, title="Resnet Model Performance")
In [33]:
def show_classification_report_and_confusion_matrix(model, valid_generator, STEP_SIZE_VALID, n_classes, artists_top_name):
"""
Display the classification report and confusion matrix for a given model and validation generator.
Parameters:
- model: The trained model.
- valid_generator: The validation generator.
- STEP_SIZE_VALID: Number of steps (batches) per validation epoch.
- n_classes: Number of output classes.
- artists_top_name: List of artist names.
Returns:
None
"""
# Loop on each generator batch and predict
y_pred, y_true = [], []
for _ in range(STEP_SIZE_VALID):
(X, y) = next(valid_generator)
y_pred.append(model.predict(X))
y_true.append(y)
# Create a flat list for y_true and y_pred
y_pred = [subresult for result in y_pred for subresult in result]
y_true = [subresult for result in y_true for subresult in result]
# Update Truth vector based on argmax
y_true = np.argmax(y_true, axis=1)
y_true = np.asarray(y_true).ravel()
# Update Prediction vector based on argmax
y_pred = np.argmax(y_pred, axis=1)
y_pred = np.asarray(y_pred).ravel()
# Confusion Matrix
fig, ax = plt.subplots(figsize=(10, 10))
conf_matrix = confusion_matrix(y_true, y_pred, labels=np.arange(n_classes))
conf_matrix = conf_matrix / np.sum(conf_matrix, axis=1)
sns.heatmap(conf_matrix, annot=True, fmt=".2f", square=True, cbar=False,
cmap=plt.cm.jet, xticklabels=artists_top_name, yticklabels=artists_top_name,
ax=ax)
ax.set_ylabel('Actual')
ax.set_xlabel('Predicted')
ax.set_title('Confusion Matrix')
plt.show()
# Classification Report
print('Classification Report:')
print(classification_report(y_true, y_pred, labels=np.arange(n_classes), target_names=artists_top_name))
show_classification_report_and_confusion_matrix(model_vgg16, validation_generator, STEP_SIZE_VALID, n_classes, artists_top_name)
1/1 [==============================] - 0s 370ms/step 1/1 [==============================] - 0s 26ms/step 1/1 [==============================] - 0s 31ms/step 1/1 [==============================] - 0s 23ms/step 1/1 [==============================] - 0s 26ms/step 1/1 [==============================] - 0s 25ms/step 1/1 [==============================] - 0s 23ms/step 1/1 [==============================] - 0s 22ms/step 1/1 [==============================] - 0s 24ms/step 1/1 [==============================] - 0s 23ms/step 1/1 [==============================] - 0s 25ms/step 1/1 [==============================] - 0s 23ms/step 1/1 [==============================] - 0s 23ms/step 1/1 [==============================] - 0s 28ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 23ms/step 1/1 [==============================] - 0s 26ms/step 1/1 [==============================] - 0s 24ms/step 1/1 [==============================] - 0s 22ms/step 1/1 [==============================] - 0s 23ms/step 1/1 [==============================] - 0s 24ms/step 1/1 [==============================] - 0s 23ms/step 1/1 [==============================] - 0s 26ms/step 1/1 [==============================] - 0s 23ms/step 1/1 [==============================] - 0s 25ms/step 1/1 [==============================] - 0s 35ms/step 1/1 [==============================] - 0s 36ms/step 1/1 [==============================] - 0s 32ms/step 1/1 [==============================] - 0s 40ms/step 1/1 [==============================] - 0s 32ms/step 1/1 [==============================] - 0s 31ms/step 1/1 [==============================] - 0s 24ms/step 1/1 [==============================] - 0s 24ms/step 1/1 [==============================] - 0s 22ms/step 1/1 [==============================] - 0s 22ms/step 1/1 [==============================] - 0s 26ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 22ms/step 1/1 [==============================] - 0s 23ms/step 1/1 [==============================] - 0s 23ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 23ms/step 1/1 [==============================] - 0s 23ms/step 1/1 [==============================] - 0s 24ms/step 1/1 [==============================] - 0s 26ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 22ms/step 1/1 [==============================] - 0s 28ms/step 1/1 [==============================] - 0s 23ms/step 1/1 [==============================] - 0s 25ms/step 1/1 [==============================] - 0s 22ms/step 1/1 [==============================] - 0s 24ms/step 1/1 [==============================] - 0s 27ms/step
Classification Report:
precision recall f1-score support
Vincent_van_Gogh 0.88 0.54 0.67 65
Edgar_Degas 1.00 0.02 0.04 49
Pablo_Picasso 0.56 0.60 0.58 131
Pierre-Auguste_Renoir 1.00 0.16 0.27 44
Albrecht_Dürer 0.88 0.29 0.43 49
Paul_Gauguin 0.29 0.89 0.44 92
Francisco_Goya 0.67 0.54 0.60 61
Rembrandt 0.42 0.91 0.58 74
Alfred_Sisley 0.82 0.54 0.65 61
Titian 0.74 0.48 0.58 42
Marc_Chagall 0.89 0.43 0.58 180
accuracy 0.53 848
macro avg 0.74 0.49 0.49 848
weighted avg 0.71 0.53 0.52 848
In [34]:
show_classification_report_and_confusion_matrix(model_resnet50, validation_generator, STEP_SIZE_VALID, n_classes, artists_top_name)
1/1 [==============================] - 1s 1s/step 1/1 [==============================] - 0s 47ms/step 1/1 [==============================] - 0s 42ms/step 1/1 [==============================] - 0s 41ms/step 1/1 [==============================] - 0s 42ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 30ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 31ms/step 1/1 [==============================] - 0s 29ms/step 1/1 [==============================] - 0s 30ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 29ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 29ms/step 1/1 [==============================] - 0s 30ms/step 1/1 [==============================] - 0s 28ms/step 1/1 [==============================] - 0s 28ms/step 1/1 [==============================] - 0s 28ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 32ms/step 1/1 [==============================] - 0s 44ms/step 1/1 [==============================] - 0s 52ms/step 1/1 [==============================] - 0s 40ms/step 1/1 [==============================] - 0s 43ms/step 1/1 [==============================] - 0s 37ms/step 1/1 [==============================] - 0s 28ms/step 1/1 [==============================] - 0s 28ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 30ms/step 1/1 [==============================] - 0s 28ms/step 1/1 [==============================] - 0s 29ms/step 1/1 [==============================] - 0s 31ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 35ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 32ms/step 1/1 [==============================] - 0s 31ms/step 1/1 [==============================] - 0s 30ms/step 1/1 [==============================] - 0s 27ms/step 1/1 [==============================] - 0s 33ms/step
Classification Report:
precision recall f1-score support
Vincent_van_Gogh 0.98 0.90 0.94 68
Edgar_Degas 0.91 0.65 0.76 48
Pablo_Picasso 0.70 0.95 0.80 128
Pierre-Auguste_Renoir 0.88 0.65 0.75 46
Albrecht_Dürer 0.97 0.79 0.87 48
Paul_Gauguin 0.79 0.89 0.84 89
Francisco_Goya 0.87 0.70 0.78 57
Rembrandt 0.87 0.83 0.85 75
Alfred_Sisley 0.88 0.83 0.85 60
Titian 0.81 0.77 0.79 44
Marc_Chagall 0.85 0.87 0.86 180
accuracy 0.83 843
macro avg 0.87 0.80 0.83 843
weighted avg 0.85 0.83 0.83 843
Prediction¶
In [35]:
def predict_artist(url: str, model: Model, train_input_shape: tuple[int, int, int]) -> None:
"""
Predict the artist of an image from a given URL using a pre-trained model.
Parameters:
- url (str): URL of the image to be predicted.
- model (Model): Pre-trained Keras model for artist prediction.
- train_input_shape (tuple): Input shape of the training data (height, width, channels).
Returns:
None
"""
# Read and preprocess the web image
web_image = imageio.imread(url)
web_image = cv2.resize(web_image, dsize=train_input_shape[0:2])
web_image = image.img_to_array(web_image)
web_image /= 255.
web_image = np.expand_dims(web_image, axis=0)
labels = train_generator.class_indices
labels = dict((v,k) for k,v in labels.items())
# Make predictions using the model
prediction = model.predict(web_image)
prediction_probability = np.amax(prediction)
prediction_idx = np.argmax(prediction)
# Map the index to the corresponding artist label
predicted_artist = labels[prediction_idx].replace('_', ' ')
# Display the prediction results
print("Predicted artist =", predicted_artist)
print("Prediction probability =", prediction_probability * 100, "%")
# Display the image
plt.imshow(imageio.imread(url))
plt.axis('off')
plt.show()
# Define the URL of the image
url = 'https://media.cnn.com/api/v1/images/stellar/prod/230215142139-02-banksy-valentines-day-mascara.jpg?q=w_2000,c_fill/f_webp'
# Call the function to predict the artist
predict_artist(url, model_vgg16, (224, 224, 3))
<ipython-input-35-6944bf7c5289>:15: DeprecationWarning: Starting with ImageIO v3 the behavior of this function will switch to that of iio.v3.imread. To keep the current behavior (and make this warning disappear) use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly. web_image = imageio.imread(url)
1/1 [==============================] - 1s 521ms/step Predicted artist = Pablo Picasso Prediction probability = 90.51687717437744 %
<ipython-input-35-6944bf7c5289>:37: DeprecationWarning: Starting with ImageIO v3 the behavior of this function will switch to that of iio.v3.imread. To keep the current behavior (and make this warning disappear) use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly. plt.imshow(imageio.imread(url))
In [36]:
predict_artist(url, model_resnet50, (224, 224, 3))
<ipython-input-35-6944bf7c5289>:15: DeprecationWarning: Starting with ImageIO v3 the behavior of this function will switch to that of iio.v3.imread. To keep the current behavior (and make this warning disappear) use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly. web_image = imageio.imread(url)
1/1 [==============================] - 2s 2s/step Predicted artist = Pablo Picasso Prediction probability = 81.4846396446228 %
<ipython-input-35-6944bf7c5289>:37: DeprecationWarning: Starting with ImageIO v3 the behavior of this function will switch to that of iio.v3.imread. To keep the current behavior (and make this warning disappear) use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly. plt.imshow(imageio.imread(url))
In [37]:
# Define the URL of the image
url = 'https://www.gpsmycity.com/img/gd/2081.jpg'
predict_artist(url, model_vgg16, (224, 224, 3))
<ipython-input-35-6944bf7c5289>:15: DeprecationWarning: Starting with ImageIO v3 the behavior of this function will switch to that of iio.v3.imread. To keep the current behavior (and make this warning disappear) use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly. web_image = imageio.imread(url)
1/1 [==============================] - 0s 33ms/step Predicted artist = Pablo Picasso Prediction probability = 61.063218116760254 %
<ipython-input-35-6944bf7c5289>:37: DeprecationWarning: Starting with ImageIO v3 the behavior of this function will switch to that of iio.v3.imread. To keep the current behavior (and make this warning disappear) use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly. plt.imshow(imageio.imread(url))
In [38]:
predict_artist(url, model_resnet50, (224, 224, 3))
<ipython-input-35-6944bf7c5289>:15: DeprecationWarning: Starting with ImageIO v3 the behavior of this function will switch to that of iio.v3.imread. To keep the current behavior (and make this warning disappear) use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly. web_image = imageio.imread(url)
1/1 [==============================] - 0s 23ms/step Predicted artist = Titian Prediction probability = 84.05880331993103 %
<ipython-input-35-6944bf7c5289>:37: DeprecationWarning: Starting with ImageIO v3 the behavior of this function will switch to that of iio.v3.imread. To keep the current behavior (and make this warning disappear) use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly. plt.imshow(imageio.imread(url))